import os
import cv2
import numpy as np
from moviepy.editor import VideoFileClip
from IPython.display import HTML
import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
# kill axis in rcParams
#print(mpl.rcParams.keys())
mpl.rc('image', interpolation='bilinear')
mpl.rc('axes.spines',top=False,bottom=False,left=False,right=False);
mpl.rc('axes',facecolor=(1,1,1,0),edgecolor=(1,1,1,0));
mpl.rc(('xtick','ytick'),color=(1,1,1,0));
# enlarge default figure size
mpl.rc('figure', figsize=(10, 5))
Magic constants
# number of chessboard joints in each direction
NX = 9
NY = 6
# directory with camera calibration images
CAM_CAL_DIR = "camera_cal"
# directory for output images
OUT_DIR = "output_images"
# directory with test images
IN_DIR = "test_images"
The following block performs camera calibration using OpenCV framework and an image directory, supplied with the task
def extractCornersOne(path, bPlot=False):
print(path)
img = cv2.imread(path)
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (NX, NY), None)
if not ret:
corners = None
fig = plt.figure()
plt.title('Failed to find corners grid of ({},{}) in image {}:'.format(NX, NY, path))
plt.imshow(img)
elif bPlot:
cv2.drawChessboardCorners(img, (NX, NY), corners, ret)
fig = plt.figure()
plt.title('Corners grid of ({},{}) found in image {}:'.format(NX, NY, path))
plt.imshow(img)
return corners, gray.shape
def extractCornersDir(path):
corners = []
i = 0
shapeAll = None
for img in os.listdir(path):
cornersImg, shape = extractCornersOne(CAM_CAL_DIR + '/' + img, i == 1)
if cornersImg is None:
continue
if shapeAll is None:
shapeAll = shape
elif shapeAll != shape:
print('Just FYI the shape of {} is {} does not match previous images shape {}'.format(img, shape, shapeAll))
corners.append(cornersImg)
i += 1
return corners, shapeAll
def calibrateCameraUsingImages(path):
# create fixed representation of chessboard in the object space
ptsObj = np.zeros((NX*NY, 3), np.float32)
ptsObj[:,:2] = np.mgrid[0:NX, 0:NY].T.reshape(-1, 2)
ptsImg, imgShape = extractCornersDir(path)
#make sure there is a copy of object space for every image space feature sets
ptsObj = [ptsObj] * len(ptsImg)
ret, Mcam, dist, rvecs, tvecs = cv2.calibrateCamera(ptsObj, ptsImg, imgShape[::-1], None, None)
return Mcam, dist
Mcam, dist = calibrateCameraUsingImages(CAM_CAL_DIR)
print('Camera matrix: {}'.format(Mcam))
print('Lens distortion coefficients: {}'.format(dist))
Checking how the most warped image looks after undistortion:
_img = cv2.imread(CAM_CAL_DIR + '/calibration1.jpg')
_img = cv2.cvtColor(_img, cv2.COLOR_BGR2GRAY)
_img = cv2.undistort(_img, Mcam, dist, None, Mcam)
cv2.imwrite(OUT_DIR + '/undistorted.jpg', _img)
plt.imshow(_img, cmap='gray')
Could be better, but we don't have more chessboard images from this camera, so nothing we can do about these imperfections - let's proceed
def getEdgeFeaturesMap(img):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
SOBEL_KERNEL = 5
sobelX = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=SOBEL_KERNEL)
sobelY = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=SOBEL_KERNEL)
sobelAbsX = np.absolute(sobelX)
sobelAbsY = np.absolute(sobelY)
gradMag = np.sqrt(sobelX**2 + sobelY**2)
absGradDir = np.arctan2(sobelAbsY, sobelAbsX)
gradMag = (255 * gradMag / np.max(gradMag)).astype(np.uint8)
TH_MAG_MIN = 30
TH_MAG_MAX = 100
TH_DIR_MIN = 0.25 * np.pi/2
TH_DIR_MAX = 0.85 * np.pi/2
out = np.ones_like(gray)
out[(gradMag < TH_MAG_MIN) | (gradMag > TH_MAG_MAX)] = 0
out[(absGradDir < TH_DIR_MIN) | (absGradDir > TH_DIR_MAX)] = 0
return out
def getColorSpaceMap(img):
imgHls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
H = imgHls[:,:,0]
L = imgHls[:,:,1]
S = imgHls[:,:,2]
TH_S_MIN = 90
TH_S_MAX = 255
TH_H_MIN = 15
TH_H_MAX = 100
out = np.ones_like(S)
out[(S < TH_S_MIN) | (S > TH_S_MAX)] = 0
out[(H < TH_H_MIN) | (H > TH_H_MAX)] = 0
return out
def getFeaturesMap(img):
imgEdgesBin = getEdgeFeaturesMap(img)
imgColorBin = getColorSpaceMap(img)
imgFeatures = (imgEdgesBin == 1) | (imgColorBin == 1)
return imgFeatures
def visualizeBinarization():
fname = IN_DIR + '/straight_lines1.jpg'
img = cv2.cvtColor(cv2.imread(fname), cv2.COLOR_BGR2RGB)
imgEdgesBin = getEdgeFeaturesMap(img)
imgColorBin = getColorSpaceMap(img)
imgBothBin = getFeaturesMap(img)
f, axarr = plt.subplots(2, 2, figsize=(15,8))
axarr[0][0].imshow(img)
axarr[0][0].set_title('Original')
axarr[0][1].imshow(imgEdgesBin, cmap='gray')
axarr[0][1].set_title('Edge features')
axarr[1][0].imshow(imgColorBin, cmap='gray')
axarr[1][0].set_title('Color features')
axarr[1][1].imshow(imgBothBin, cmap='gray')
axarr[1][1].set_title('Edge AND Color features')
visualizeBinarization()
def getRoadSrcPoints(imgSz):
p_bl = (int(0.00 * imgSz[1]), int(0.93 * imgSz[0]))
p_br = (int(1.00 * imgSz[1]), int(0.93 * imgSz[0]))
p_tl = (int(0.435 * imgSz[1]), int(0.63 * imgSz[0]))
p_tr = (int(0.565 * imgSz[1]), int(0.63 * imgSz[0]))
return np.float32([p_tl, p_bl, p_br, p_tr])
def getRoadDstPoints(imgSz):
return np.float32([(0,0), (0,imgSz[0]-1), (imgSz[1]-1,imgSz[0]-1), (imgSz[1]-1,0)])
def getRectifiedRoadTransform(imgSz):
srcArr = getRoadSrcPoints(imgSz)
dstArr = getRoadDstPoints(imgSz)
Mwarp = cv2.getPerspectiveTransform(srcArr, dstArr)
MwarpInv = cv2.getPerspectiveTransform(dstArr, srcArr)
return Mwarp, MwarpInv
def getRectifiedRoad(img):
Mwarp, _ = getRectifiedRoadTransform(img.shape)
return cv2.warpPerspective(img, Mwarp, (img.shape[1], img.shape[0]))
def drawSrcRoadContour(img, srcArr):
srcArr = tuple(map(tuple, srcArr))
cv2.line(img, srcArr[0], srcArr[1], (255,0,0), 5)
cv2.line(img, srcArr[1], srcArr[2], (255,0,0), 5)
cv2.line(img, srcArr[2], srcArr[3], (255,0,0), 5)
cv2.line(img, srcArr[3], srcArr[0], (255,0,0), 5)
def visualizePerspectiveTransform():
fname = IN_DIR + '/test2.jpg'
img = cv2.cvtColor(cv2.imread(fname), cv2.COLOR_BGR2RGB)
img = cv2.undistort(img, Mcam, dist, None, Mcam)
warped = getRectifiedRoad(img)
imgFeatures = getFeaturesMap(img)
imgFeatursWarped = getRectifiedRoad(255 * imgFeatures.astype(np.uint8))
drawSrcRoadContour(img, getRoadSrcPoints(img.shape))
f, axarr = plt.subplots(2, 2, figsize=(15,8))
axarr[0][0].imshow(img)
axarr[0][0].set_title('Original')
axarr[0][1].imshow(warped)
axarr[0][1].set_title('Original rectified')
axarr[1][0].imshow(imgFeatures, cmap='gray')
axarr[1][0].set_title('Features')
axarr[1][1].imshow(imgFeatursWarped, cmap='gray')
axarr[1][1].set_title('Features rectified')
Below is visualization of the perspective transform operation
visualizePerspectiveTransform()
# Code for this function is mostly taken from the lecture notes
def fitLineInitialization(binary_warped, bVis):
# Assuming you have created a warped binary image called "binary_warped"
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = None
if bVis:
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
if bVis:
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high), (0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high), (0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
if bVis:
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
ptsLeft = np.column_stack((left_fitx, ploty)).astype(np.int32)
ptsRight = np.column_stack((right_fitx, ploty)).astype(np.int32)
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
cv2.polylines(out_img, [ptsLeft], False, (255,255,0), 5)
cv2.polylines(out_img, [ptsRight], False, (255,255,0), 5)
return left_fit, right_fit, out_img
# Code for this function is mostly taken from the lecture notes
def fitLineApproximation(binary_warped, left_fit, right_fit, bVis):
# Assume you now have a new warped binary image
# from the next frame of video (also called "binary_warped")
# It's now much easier to find line pixels!
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +
left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +
right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
if not(leftx.size == 0 or lefty.size == 0 or rightx.size == 0 or righty.size == 0):
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
else:
print('Polyfit failure')
out_img = None
if bVis:
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
out_img = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
ptsLeft = np.column_stack((left_fitx, ploty)).astype(np.int32)
ptsRight = np.column_stack((right_fitx, ploty)).astype(np.int32)
cv2.polylines(out_img, [ptsLeft], False, (255,255,0), 5)
cv2.polylines(out_img, [ptsRight], False, (255,255,0), 5)
return left_fit, right_fit, out_img
def calcLaneCurvatureMeters(imgFeatursWarped, left_fit, right_fit):
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
y_eval = imgFeatursWarped.shape[0]-1
leftFitMet = np.zeros_like(left_fit)
rightFitMet = np.zeros_like(right_fit)
leftFitMet[0] = left_fit[0] * ym_per_pix * ym_per_pix / xm_per_pix
leftFitMet[1] = left_fit[0] * ym_per_pix / xm_per_pix
leftFitMet[2] = left_fit[2]
rightFitMet[0] = right_fit[0] * ym_per_pix * ym_per_pix / xm_per_pix
rightFitMet[1] = right_fit[0] * ym_per_pix / xm_per_pix
rightFitMet[2] = right_fit[2]
left_curverad = ((1 + (2*leftFitMet[0]*y_eval + leftFitMet[1])**2)**1.5) / np.absolute(2*leftFitMet[0])
right_curverad = ((1 + (2*rightFitMet[0]*y_eval + rightFitMet[1])**2)**1.5) / np.absolute(2*rightFitMet[0])
return left_curverad, right_curverad
def visualizeLaneFindingInitialization():
fname = IN_DIR + '/test2.jpg'
img = cv2.cvtColor(cv2.imread(fname), cv2.COLOR_BGR2RGB)
img = cv2.undistort(img, Mcam, dist, None, Mcam)
imgFeatures = getFeaturesMap(img)
imgFeatursWarped = getRectifiedRoad(imgFeatures.astype(np.uint8))
leftFit, rightFit, imgVis = fitLineInitialization(imgFeatursWarped, True)
leftFit2, rightFit2, imgVis2 = fitLineApproximation(imgFeatursWarped, leftFit, rightFit, True)
leftCurv, rightCurv = calcLaneCurvatureMeters(imgFeatursWarped, leftFit, rightFit)
print('Lane curvatures in meters: left={0:.2f} right={0:.2f}'.format(leftCurv, rightCurv))
f, axarr = plt.subplots(1, 3, figsize=(15,8))
axarr[0].imshow(img)
axarr[0].set_title('Original')
axarr[1].imshow(imgVis)
axarr[1].set_title('Lane equation parameters found first time')
axarr[2].imshow(imgVis2)
axarr[2].set_title('Lane equation parameters found from approximation')
visualizeLaneFindingInitialization()
def convertLaneFitsToSrcContour(img, leftFit, rightFit):
plotY = np.linspace(0, img.shape[0]-1, img.shape[0]).reshape(-1,1)
leftFitX = leftFit[0]*plotY**2 + leftFit[1]*plotY + leftFit[2]
leftFitY = plotY
rightFitX = rightFit[0]*plotY**2 + rightFit[1]*plotY + rightFit[2]
rightFitX = np.flipud(rightFitX)
rightFitY = np.flipud(plotY)
_, MwarpInv = getRectifiedRoadTransform(img.shape)
#prepare a closed outline of the detected lane
plotX = np.vstack((leftFitX, rightFitX))
plotY = np.vstack((leftFitY, rightFitY))
ptsDst = np.column_stack((plotX, plotY, np.ones(plotY.shape, dtype=np.float)))
ptsSrc = np.matmul(ptsDst, MwarpInv.T)
ptsSrc /= np.column_stack((ptsSrc[:,2], ptsSrc[:,2], ptsSrc[:,2]))
return ptsSrc[:,0:2].astype(np.int32)
def drawCountourSrc(img, leftFit, rightFit):
contourSrc = convertLaneFitsToSrcContour(img, leftFit, rightFit)
imgLaneMask = np.zeros_like(img)
cv2.fillPoly(imgLaneMask, [contourSrc], (0,255,0))
imgContourSrc = cv2.addWeighted(img, 1, imgLaneMask, 0.3, 0)
return imgContourSrc
def visualizeLaneFindingSrcCountour():
fname = IN_DIR + '/test2.jpg'
img = cv2.cvtColor(cv2.imread(fname), cv2.COLOR_BGR2RGB)
img = cv2.undistort(img, Mcam, dist, None, Mcam)
imgFeatures = getFeaturesMap(img)
imgFeatursWarped = getRectifiedRoad(imgFeatures.astype(np.uint8))
leftFit, rightFit, imgVis = fitLineInitialization(imgFeatursWarped, True)
contourSrc = convertLaneFitsToSrcContour(imgFeatursWarped, leftFit, rightFit)
imgLaneMask = np.zeros_like(img)
cv2.fillPoly(imgLaneMask, [contourSrc], (0,255,0))
imgVis = cv2.addWeighted(img, 1, imgLaneMask, 0.3, 0)
f, axarr = plt.subplots(1, 2, figsize=(15,8))
axarr[0].imshow(img)
axarr[0].set_title('Original')
axarr[1].imshow(imgVis)
axarr[1].set_title('Lane area overlay')
visualizeLaneFindingSrcCountour()
def processImage(imgDist, leftFitIn=None, rightFitIn=None):
img = cv2.undistort(imgDist, Mcam, dist, None, Mcam)
imgFeatures = getFeaturesMap(img)
imgFeatursWarped = getRectifiedRoad(imgFeatures.astype(np.uint8))
if leftFitIn is None:
leftFit, rightFit, imgLanes = fitLineInitialization(imgFeatursWarped, True)
else:
leftFit, rightFit, imgLanes = fitLineApproximation(imgFeatursWarped, leftFitIn, rightFitIn, True)
leftCurv, rightCurv = calcLaneCurvatureMeters(imgFeatursWarped, leftFit, rightFit)
imgContourSrc = drawCountourSrc(img, leftFit, rightFit)
return img, imgFeatures, imgFeatursWarped, imgLanes, imgContourSrc, leftCurv, rightCurv, leftFit, rightFit
def testPipeline():
listImages = os.listdir(IN_DIR)
f, axarr = plt.subplots(len(listImages) * 2, 3, figsize=(15, 40))
axarr = axarr.flatten()
i = 0
for fname in listImages:
imgDist = cv2.cvtColor(cv2.imread(IN_DIR + '/' + fname), cv2.COLOR_BGR2RGB)
img, imgFeatures, imgFeatursWarped, imgLanesInit, imgContourSrc, leftCurv, rightCurv, _, _ = \
processImage(imgDist)
axarr[i*6+0].imshow(imgDist)
axarr[i*6+1].imshow(img)
axarr[i*6+2].imshow(imgFeatures, cmap='gray')
axarr[i*6+3].imshow(imgFeatursWarped, cmap='gray')
axarr[i*6+4].imshow(imgLanesInit)
axarr[i*6+4].set_title('Lane curvatures (meters): left={0:.2f} right={0:.2f}'.format(leftCurv, rightCurv))
axarr[i*6+5].imshow(imgContourSrc)
i += 1
f.tight_layout()
testPipeline()
VID_BASE_IN = 'project_video.mp4'
VID_BASE_OUT = 'project_video_out.mp4'
VID_CHAL_IN = 'challenge_video.mp4'
VID_CHAL_OUT = 'challenge_video_out.mp4'
VID_HARD_IN = 'harder_challenge_video.mp4'
VID_HARD_OUT = 'harder_challenge_video_out.mp4'
def processVideo(nameIn, nameOut, bDebugVis, startSec=None, endSec=None):
vin = cv2.VideoCapture(nameIn)
fps = vin.get(cv2.CAP_PROP_FPS)
vout = None
i = -1
lastLeftFit, lastRightFit = None, None
while(vin.isOpened()):
i += 1
ret, frame = vin.read()
if not ret:
break
if startSec is not None:
if i < startSec * fps:
continue
if endSec is not None:
if i > endSec * fps:
continue
if vout == None:
outDims = (frame.shape[1], frame.shape[0])
if bDebugVis:
outDims = (3 * frame.shape[1], 2 * frame.shape[0])
vout = cv2.VideoWriter(nameOut, cv2.VideoWriter_fourcc(*'XVID'), fps, outDims, True)
imgDist = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img, imgFeatures, imgFeatursWarped, imgLanesInit, imgContourSrc, leftCurv, rightCurv, leftFit, rightFit = \
processImage(imgDist, lastLeftFit, lastRightFit)
# temporal filtration of the lane polynomes
if lastLeftFit is None:
lastLeftFit = leftFit
lastRightFit = rightFit
else:
lastLeftFit = 0.9 * lastLeftFit + 0.1 * leftFit
lastRightFit = 0.9 * lastRightFit + 0.1 * rightFit
if bDebugVis:
imgFeatures = imgFeatures.astype(np.uint8) * 255
imgFeatursWarped = imgFeatursWarped.astype(np.uint8) * 255
imgFeaturesRGB = np.dstack((imgFeatures, imgFeatures, imgFeatures))
imgFeatursWarpedRGB = np.dstack((imgFeatursWarped, imgFeatursWarped, imgFeatursWarped))
imgTop = np.hstack((imgDist, img, imgFeaturesRGB))
imgBottom = np.hstack((imgFeatursWarpedRGB, imgLanesInit, imgContourSrc))
imgContourSrc = np.vstack([imgTop, imgBottom])
vout.write(cv2.cvtColor(imgContourSrc, cv2.COLOR_RGB2BGR))
vin.release()
if vout != None:
vout.release()
#%time processVideo(VID_BASE_IN, VID_BASE_OUT, True, 37, 42)
#%time processVideo(VID_BASE_IN, VID_BASE_OUT, True)
%time processVideo(VID_BASE_IN, VID_BASE_OUT, False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(VID_BASE_OUT))
%time processVideo(VID_HARD_IN, VID_HARD_OUT, True, 6, 12)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(VID_HARD_OUT))